{
#define MAX_CPU_ID 255
xc_physinfo_t info;
- char cpu_cap[128], *p=cpu_cap, *q=cpu_cap;
+ char cpu_cap[128], virt_caps[128], *p;
int i, j, max_cpu_id;
uint64_t free_heap;
PyObject *ret_obj, *node_to_cpu_obj, *node_to_memory_obj;
xc_cpu_to_node_t map[MAX_CPU_ID + 1];
+ const char *virtcap_names[] = { "hvm", "hvm_directio" };
set_xen_guest_handle(info.cpu_to_node, map);
info.max_cpu_id = MAX_CPU_ID;
if ( xc_physinfo(self->xc_handle, &info) != 0 )
return pyxc_error_to_exception();
- *q = 0;
+ p = cpu_cap;
+ *p = '\0';
for ( i = 0; i < sizeof(info.hw_cap)/4; i++ )
- {
p += sprintf(p, "%08x:", info.hw_cap[i]);
- if ( info.hw_cap[i] )
- q = p;
- }
- if ( q > cpu_cap )
- *(q-1) = 0;
+ *(p-1) = 0;
+
+ p = virt_caps;
+ *p = '\0';
+ for ( i = 0; i < 2; i++ )
+ if ( (info.capabilities >> i) & 1 )
+ p += sprintf(p, "%s ", virtcap_names[i]);
+ if ( p != virt_caps )
+ *(p-1) = '\0';
- ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s}",
+ ret_obj = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:l,s:l,s:l,s:i,s:s:s:s}",
"nr_nodes", info.nr_nodes,
"max_cpu_id", info.max_cpu_id,
"threads_per_core", info.threads_per_core,
"free_memory", pages_to_kib(info.free_pages),
"scrub_memory", pages_to_kib(info.scrub_pages),
"cpu_khz", info.cpu_khz,
- "hw_caps", cpu_cap);
+ "hw_caps", cpu_cap,
+ "virt_caps", virt_caps);
max_cpu_id = info.max_cpu_id;
if ( max_cpu_id > MAX_CPU_ID )
physinfo = self.physinfo_dict()
cpu_count = physinfo['nr_cpus']
cpu_features = physinfo['hw_caps']
+ virt_caps = physinfo['virt_caps']
# If the number of CPUs don't match, we should just reinitialise
# the CPU UUIDs.
self.cpus[u].update(
{ 'host' : self.uuid,
'features' : cpu_features,
+ 'virt_caps': virt_caps,
'speed' : int(float(cpuinfo[number]['cpu MHz'])),
'vendor' : cpuinfo[number]['vendor_id'],
'modelname': cpuinfo[number]['model name'],
'threads_per_core',
'cpu_mhz',
'hw_caps',
+ 'virt_caps',
'total_memory',
'free_memory',
'node_to_cpu',
xen_sysctl_physinfo_t *pi = &op->u.physinfo;
+ memset(pi, 0, sizeof(*pi));
pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
pi->cores_per_socket =
cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
pi->free_pages = avail_domheap_pages();
pi->scrub_pages = avail_scrub_pages();
pi->cpu_khz = local_cpu_data->proc_freq / 1000;
- memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
max_array_ent = pi->max_cpu_id;
pi->max_cpu_id = last_cpu(cpu_online_map);
{
xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
+ memset(pi, 0, sizeof(*pi));
pi->threads_per_core =
cpus_weight(cpu_sibling_map[0]);
pi->cores_per_socket =
pi->total_pages = total_pages;
pi->free_pages = avail_domheap_pages();
pi->cpu_khz = cpu_khz;
- memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
- ret = 0;
- if ( copy_to_guest(u_sysctl, sysctl, 1) )
- ret = -EFAULT;
+ ret = copy_to_guest(u_sysctl, sysctl, 1) ? -EFAULT : 0;
}
break;
if ( ret )
break;
+ memset(pi, 0, sizeof(*pi));
pi->threads_per_core =
cpus_weight(cpu_sibling_map[0]);
pi->cores_per_socket =
cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
pi->nr_cpus = (u32)num_online_cpus();
pi->nr_nodes = num_online_nodes();
- pi->total_pages = total_pages;
- pi->free_pages = avail_domheap_pages();
- pi->scrub_pages = avail_scrub_pages();
- pi->cpu_khz = cpu_khz;
- memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
+ pi->total_pages = total_pages;
+ pi->free_pages = avail_domheap_pages();
+ pi->scrub_pages = avail_scrub_pages();
+ pi->cpu_khz = cpu_khz;
memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
+ if ( hvm_enabled )
+ pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
+ if ( iommu_enabled )
+ pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
max_array_ent = pi->max_cpu_id;
pi->max_cpu_id = last_cpu(cpu_online_map);
* Get physical information about the host machine
*/
#define XEN_SYSCTL_physinfo 3
+ /* (x86) The platform supports HVM guests. */
+#define _XEN_SYSCTL_PHYSCAP_hvm 0
+#define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
+ /* (x86) The platform supports HVM-guest direct access to I/O devices. */
+#define _XEN_SYSCTL_PHYSCAP_hvm_directio 1
+#define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio)
struct xen_sysctl_physinfo {
- /* IN variables. */
uint32_t threads_per_core;
uint32_t cores_per_socket;
uint32_t nr_cpus;
uint64_aligned_t scrub_pages;
uint32_t hw_cap[8];
- /* IN/OUT variables. */
/*
* IN: maximum addressable entry in the caller-provided cpu_to_node array.
* OUT: largest cpu identifier in the system.
* elements of the array will not be written by the sysctl.
*/
XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
+
+ /* XEN_SYSCTL_PHYSCAP_??? */
+ uint32_t capabilities;
};
typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);